The given dataset comprises of images from 12 plant species. The objective is to create a classifier capable of determining a plant's species from a photo
%tensorflow_version 2.x
import tensorflow as tf
tf.__version__
# Initialize the random number generator
import random
random.seed(0)
# Ignore the warnings
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from glob import glob
import os
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from tqdm import tqdm
import time
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from keras.utils import np_utils
from keras.preprocessing import image
from keras.models import Sequential, model_from_json, Model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Flatten, Dense ,Input
from keras.optimizers import Adam ,RMSprop
from google.colab import drive
drive.mount('/content/drive/')
# Adding path to the folder where dataset files are
project_path = '/content/drive/My Drive/AIML/Projects/CV1/'
train_data_path= project_path + 'Plant Seedling Classification Data/train'
images = []
classes=[]
for class_folder_name in os.listdir(train_data_path):
class_folder_path = os.path.join(train_data_path, class_folder_name)
class_label = class_folder_name
for image_path in glob(os.path.join(class_folder_path, "*.png")):
image_bgr = cv2.imread(image_path, cv2.IMREAD_COLOR)
images.append(image_bgr)
classes.append(class_label)
print('Total Number of images :',len(images))
print('The shape of a sample image :',images[0].shape)
images = np.array(images)
classes = np.array(classes)
values =[]
labels=[]
for label in set(classes):
values.append(len(images[classes == label]))
labels.append(label)
fig, ax = plt.subplots(figsize=(28,7))
ax.bar(labels, values)
ax.set_xlabel("Species")
ax.set_ylabel("Number of images")
fig.suptitle('Class Distribution of the dataset')
fig, axs = plt.subplots(1, 4, figsize=(15,15))
axs[0].imshow(images[0])
axs[1].imshow(images[20])
axs[2].imshow(images[30])
axs[3].imshow(images[80])
plt.hist(images[0].ravel(),256,[0,255])
plt.show()
plt.hist(images[20].ravel(),256,[0,255])
plt.show()
plt.hist(images[30].ravel(),256,[0,255])
plt.show()
plt.hist(images[80].ravel(),256,[0,255])
plt.show()
Inference from Histograms and Sample Images:
Preprocessing data and arriving at Train/Validation split
def load_dataset(path):
data=load_files(path)
plant_files=np.array(data['filenames'])
plant_targets=np_utils.to_categorical(np.array(data['target']),12)
return plant_files,plant_targets
print('Loading Train Files and Targets')
train_files, train_targets = load_dataset(train_data_path)
print('Loading Complete!')
print('There are %d training plant images.' % len(train_files))
#list of plant names
plant_names= [item[83:-1] for item in sorted(glob(train_data_path + "/*/"))]
print('There are %d total plant categories.' % len(plant_names))
plant_names
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(256, 256))
# convert PIL.Image.Image type to 3D tensor with shape (256, 256, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 256, 256, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
train_tensors.shape
fig, axs = plt.subplots(1, 4, figsize=(15,15))
axs[0].imshow(train_tensors[0])
axs[1].imshow(train_tensors[20])
axs[2].imshow(train_tensors[30])
axs[3].imshow(train_tensors[80])
img = cv2.imread(train_data_path + "/Maize/ff4b55219.png",0)
plt.hist(img.ravel(),256,[0,256])
plt.show()
plt.hist(train_tensors[0].ravel(),256,[0,1])
plt.show()
seed = 20
np.random.seed(seed)
# Split the train and the validation set
train_tensors, val_train, train_targets, val_targets = train_test_split(train_tensors,
train_targets,
test_size=0.15,
random_state=seed
)
print(train_tensors.shape)
print(val_train.shape)
print(train_targets.shape)
print(val_targets.shape)
1.2.a. Training Image Classifier Model using Neural Network
#Reshape data to 1D
X = np.asarray(train_tensors).reshape(4051,196608)
##Initialize the Artificial Neural Network Classifier
model = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
model.add(Dense(512, kernel_initializer='he_normal', activation ='relu',input_shape = (196608,)))
#Hidden Layer 1
#Adding first Hidden layer
model.add(Dense(256, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 2
#Adding second Hidden layer
model.add(Dense(128, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 3
#Adding third Hidden layer
model.add(Dense(64, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
model.add(Dense(32, kernel_initializer='he_normal', activation ='relu'))
# Output Layer
#Adding output layer which is of 12 nodes (digits)
model.add(Dense(12,activation ='softmax' ))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, train_targets, batch_size=128, epochs=40, verbose=1, validation_split=0.1, shuffle=True)
1.2.b. Training Image Classifier Model using CNN
model= Sequential()
model.add(Conv2D(filters = 16, kernel_size = (5,5),padding = 'Same', activation ='relu',input_shape=(256,256,3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 128, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 128, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(GlobalAveragePooling2D())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(12,activation='softmax'))
model.summary()
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer, loss = "categorical_crossentropy", metrics = ["accuracy"])
checkpointer = ModelCheckpoint(filepath= project_path + 'saved_models/cnn_model_wo_aug_af.hdf5',
verbose=1, save_best_only=True)
history = model.fit(train_tensors, train_targets, epochs = 20, validation_data = (val_train,val_targets),callbacks=[checkpointer],batch_size = 128)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.0001)
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(train_tensors)
batch = 32
checkpointer = ModelCheckpoint(filepath=project_path+'saved_models/cnn_model_with_aug_af.hdf5',
verbose=1, save_best_only=True)
history = model.fit_generator(datagen.flow(train_tensors,train_targets,batch_size=batch),epochs = 10, validation_data = (val_train,val_targets),
verbose = 1, steps_per_epoch=train_tensors.shape[0] // batch
, callbacks=[learning_rate_reduction, checkpointer])
model.load_weights(project_path+'saved_models/cnn_model_with_aug_af.hdf5')
accuracy=model.evaluate(x=val_train,y=val_targets,batch_size=32)
print("Loss of the model: {} , Accuracy of Model :{} ".format(accuracy[0],accuracy[1]))
def one_hot_to_dense(labels_one_hot):
num_labels = labels_one_hot.shape[0]
num_classes = labels_one_hot.shape[1]
labels_dense = np.where(labels_one_hot == 1)[1]
return labels_dense
validation_predictions = model.predict_classes(val_train)
report=classification_report(one_hot_to_dense(val_targets),validation_predictions)
print(report)
species = ['Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat', 'Fat Hen', 'Loose Silky-bent', 'Maize',
'Scentless Mayweed', 'Shepherds Purse', 'Small-flowered Cranesbill', 'Sugar beet']
num_species = len(species)
abbreviation = ['BG', 'Ch', 'Cl', 'CC', 'CW', 'FH', 'LSB', 'M', 'SM', 'SP', 'SFC', 'SB']
pd.DataFrame({'class': species, 'abbreviation': abbreviation})
conf_mat= confusion_matrix(one_hot_to_dense(val_targets), validation_predictions)
fig, ax = plt.subplots(1,figsize=(10,10))
ax = sns.heatmap(conf_mat, ax=ax, cmap=plt.cm.BuGn, annot=True)
ax.set_xticklabels(abbreviation)
ax.set_yticklabels(abbreviation)
plt.title('Confusion Matrix')
plt.ylabel('True class')
plt.xlabel('Predicted class')
fig.savefig('Confusion matrix.png', dpi=300)
plt.show();
AIML Image classifier has been trained by using Neural Networks and CNN models separately. The accuracy did not improve beyond a point though number of epochs were increased for neural network model. The accuracy of CNN model is far better than Neural Network model.The accuracy of CNN model got further increased when data augmentation techiques are used for image processing.CNN model is chosen as best performing model and used for predicting the class of given Image
Steps:
model_json = model.to_json()
with open(project_path+"saved_models/model_after_rev.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(project_path+"saved_models/model_after_rev.h5")
print("Saved model to disk")
def plant_seed_classification(image):
image = path_to_tensor(image).astype('float32')/255
json_file = open(project_path+'saved_models/model_after_rev.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(project_path+"saved_models/model_after_rev.h5")
print("Loaded model from disk")
prediction = model.predict_classes(image)
print('The predicted Class of the image is {}'.format(prediction))
INV_CLASS = {
0: 'Black-grass',
1: 'Charlock',
2: 'Cleavers',
3: 'Common Chickweed',
4: 'Common wheat',
5: 'Fat Hen',
6: 'Loose Silky-bent',
7: 'Maize',
8: 'Scentless Mayweed',
9: 'Shepherds Purse',
10: 'Small-flowered Cranesbill',
11: 'Sugar beet'
}
print('The class of the plant is',[INV_CLASS[p] for p in prediction])
from google.colab.patches import cv2_imshow
pred_image = cv2.imread(project_path + '/Prediction/Predict.png')
cv2_imshow(pred_image)
plant_seed_classification(project_path + '/Prediction/Predict.png')
Predictions Results: The given image for Prediction is processed by Plant Seedling Image classifier model and predicted as "Maize"
Supervised Learning Models: Supervised learning is a machine learning technique which is we can associate between inputs and ground truth in a dataset. Supervised Learning models works well for classification and regression problems. However, for image classification, Neural Networks provides better accuracy than using Supervised Learning models like modified Support Vector Machines (SVM) with feature extraction using PCA or K-Nearest Neighbors (KNN)
Neural Networks : A simple neural network (NN) uses dense layers. Hence, it is better than Supervised learning models for Image classification. Though the accuracy of model will be better than Supervised learning models. However, it may not be very good. Performance of NN can be improved by Adding more layers, Experimenting with different optimizer function/regularization function,Increasing number of epochs, Increasing amount of data
Convolutional Neural Networks: Convolution layers have proved to be very good in image classification. They allow parameter sharing which results in a very optimized network compared to using Dense layers. The network training time will be lesser and training loss is very optimized than simple neural networks. Transfer Learning can also be used.Transfer learning is a method of reusing the already acquired knowledge. The idea is to use a state of the art model which is already trained on a larger dataset for long time and proven to work well in related task.
Building Car Image Classifier Dataset - Challenges:
Import and display car images in python :
img_data_path= project_path + ('/Cars Images/')
def load_dataset(path):
data=load_files(path)
car_files=np.array(data['filenames'])
car_targets=np_utils.to_categorical(np.array(data['target']),15)
return car_files, car_targets
print('Loading car Files and Targets')
car_files, car_targets = load_dataset(img_data_path)
print('Loading Complete!')
print('There are %d car images.' % len(car_files))
#list of car names
carfile_names= [item[60:] for item in sorted(glob(img_data_path + "cars/*.jpg"))]
print('There are %d car file names.' % len(carfile_names))
print(carfile_names)
def carpath_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(256, 256))
# convert PIL.Image.Image type to 3D tensor with shape (256, 256, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 256, 256, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def carpaths_to_tensor(img_paths):
list_of_tensors = [carpath_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
# pre-process the data for Keras
car_tensors = carpaths_to_tensor(car_files).astype('float32')/255
car_tensors.shape
fig=plt.figure(figsize=(8,8))
for i in range(1, len(carfile_names)):
img=car_tensors[i]
fig.add_subplot(4,4,i)
plt.imshow(img,cmap='gray')
plt.show()
Car images collected and stored in car dataset are displayed as above
Objective is to create flower classifier capable of determining a flower’s species from a photo
4.1. EDA and Visualization
Importing Data
Analysing the dimensions of data
Preprocessing Data
Visualizing the data - Displaying images, labels, images vs labels, applying different filters
!pip install tflearn
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
# Importing oxflower17 dataset
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data (one_hot=True)
# Printing the shape and data type of extracted data
print("X data-- Shape :", X.shape,"and Data Type : ", X.dtype)
print("Y data-- Shape :", Y.shape,"and Data Type : ", Y.dtype)
# Visualizing the dataset
fig=plt.figure(figsize=(8,8))
columns=10
rows=10
for i in range(1, columns*rows+1):
img=X[i]
fig.add_subplot(rows,columns,i)
plt.imshow(img,cmap='gray')
plt.show()
# show an image and label in the dataset
plt.imshow(X[0],cmap='gray')
plt.show()
print('One hot encoded value: ', Y[0])
# visualizing the first 10 images in the dataset and their one hot encoded values
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 1))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(X[i],cmap='gray')
plt.axis('off')
plt.show()
print('One hot encoded values for each of the above image: \n %s' % (Y[0:10]))
train_data_path1= project_path + 'Flowers-Classification/17flowers-train/jpg/'
images = []
classes=[]
for class_folder_name in os.listdir(train_data_path1):
class_folder_path = os.path.join(train_data_path1, class_folder_name)
class_label = class_folder_name
for image_path in glob(os.path.join(class_folder_path, "*.jpg")):
image_bgr = cv2.imread(image_path, cv2.IMREAD_COLOR)
images.append(image_bgr)
classes.append(class_label)
images = np.array(images)
classes = np.array(classes)
values =[]
labels=[]
for label in set(classes):
values.append(len(images[classes == label]))
labels.append(label)
fig, ax = plt.subplots(figsize=(28,7))
ax.bar(labels, values)
ax.set_xlabel("Lables")
ax.set_ylabel("Number of images")
fig.suptitle('Class Distribution of the dataset')
print("labels:",labels)
# Applying Image filters
# Blur image
from google.colab.patches import cv2_imshow
image = cv2.imread(train_data_path1+'16/image_1359.jpg')
blur_image = cv2.blur(image,(4,4))
print("Blurred Image:")
cv2_imshow(blur_image)
# Sharpening image
filter = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
Sharpened_image = cv2.filter2D(image,-1,filter)
print("Sharpened Image:")
cv2_imshow(Sharpened_image)
# Embossing Image
filter = np.array([[0,1,0],[0,0,0],[0,-1,0]])
emboss_img=cv2.filter2D(image,-1,filter)
emboss_img=emboss_img+128
print("Embossed Image:")
cv2_imshow(emboss_img)
# Edge Detection
filter = np.array([[0,0,-1,0,0],[0,-1,-2,-1,0],[-1,-2,16,-2,-1],[0,-1,-2,-1,0],[0,0,-1,0,0]])
edge_detect_img=cv2.filter2D(image,-1,filter)
print("Image - Edge Detection:")
cv2_imshow(edge_detect_img)
4.2.a - Train tuning AIML image classifier model using CNN model and Testing
model= Sequential()
model.add(Conv2D(filters = 16, kernel_size = (5,5),padding = 'Same', activation ='relu',input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 128, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 128, kernel_size = (3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(GlobalAveragePooling2D())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(17,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, Y, batch_size=64, epochs=20, verbose=1, validation_split=0.1, shuffle=True)
# Printing loss and accuracy for test data
results = model.evaluate(X, Y)
print('Test_acc using CNN model : ', results[1])
print(model.metrics_names)
print(results)
4.2.b - Train tune AIML image classifier model using Neural Networks and Testing
#Reshape data to 1D
X_NN = np.asarray(X).reshape(1360,150528)
##Initialize the Artificial Neural Network Classifier
model = Sequential()
# Input Layer
#Adding Input layer and activation functions ReLU
model.add(Dense(512, kernel_initializer='he_normal', activation ='relu',input_shape = (150528,)))
#Hidden Layer 1
#Adding first Hidden layer
model.add(Dense(256, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 2
#Adding second Hidden layer
model.add(Dense(128, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 3
#Adding third Hidden layer
model.add(Dense(64, kernel_initializer='he_normal', activation ='relu'))
#Hidden Layer 4
#Adding fourth Hidden layer
model.add(Dense(32, kernel_initializer='he_normal', activation ='relu'))
# Output Layer
#Adding output layer which is of 17 nodes (digits)
model.add(Dense(17,activation ='softmax' ))
model.summary()
# compiling the ANN classifier
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# model.fit(X, Y, batch_size=64, epochs=20, verbose=1, validation_split=0.1, shuffle=True)
# Fitting the ANN to the Training data
model.fit(X_NN , Y, validation_split=0.1,batch_size = 128, epochs = 20, verbose = 1)
# Printing loss and accuracy for test data
results = model.evaluate(X_NN, Y)
print('Test_acc using NN model : ', results[1])
print(model.metrics_names)
print(results)
4.2.c - Train tuning AIML image classifier model using CNN with Transfer learning model (AlexNet) and testing
model = Sequential()
model.add(Conv2D(96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=(224, 224, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(4096, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(17, activation='softmax'))
model.summary()
from keras import optimizers
#Setting the learning rate in adam
adam = optimizers.Adam(lr = 0.0001)
model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
#we are taking small learning rate, the model is learning slowly
checkpointer = ModelCheckpoint(filepath= project_path + 'saved_models/oxyFlower_model_wo_aug_af.hdf5',
verbose=1, save_best_only=True)
history = model.fit(X, Y, epochs = 40, validation_split=0.1,callbacks=[checkpointer],batch_size = 64)
# Printing loss and accuracy for test data
results = model.evaluate(X, Y)
print('Test_acc using CNN with Transfer learning model (AlexNet) : ', results[1])
print(model.metrics_names)
print(results)
Saving the best performing model and using it for prediction
The image classifier model using CNN with Transfer learning model (AlexNet)has given superior results when compared to all other models used for training. Hence, this model is chosen as best performing model for flowers image classification
model_json = model.to_json()
with open(project_path+"saved_models/oxyFlower_model_after_rev.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(project_path+"saved_models/oxyFlower_model_after_rev.h5")
print("Saved model to disk")
GUI to predict class/label of Flower Image
Below code is executed in Jupyter Notebook separately as tkinter package cannot be executed in Google colab. The output screen shot is attached in the word file uploaded
!pip install tkintertable
!pip install tensorflow
!pip install keras
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy
#dictionary to label all the oxflower17 dataset classes.
classes = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: '10',
11: '11',
12: '12',
13: '13',
14: '14',
15: '15',
16: '16'
}
#initialise GUI
top=tk.Tk()
top.geometry('800x600')
top.title('CLASSIFIER GUI-Great Learning')
top.configure(background='#CDCDCD')
label=Label(top,background='#CDCDCD', font=('arial',15,'bold'))
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((32,32))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
pred = model.predict_classes([image])[0]
sign = classes[pred]
print(sign)
label.configure(foreground='#011638', text=sign)
def show_classify_button(file_path):
classify_b=Button(top,text="Predict",
command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',
font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),
(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Import Data",command=upload_image,
padx=10,pady=5)
upload.configure(background='#364156', foreground='white',
font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
label.pack(side=BOTTOM,expand=True)
heading = Label(top, text="CLASSIFIER GUI-Great Learning",pady=20, font=('arial',20,'bold'))
heading.configure(background='#CDCDCD',foreground='#364156')
heading.pack()
top.mainloop()
from tensorflow.python.keras.models import load_model
model = load_model("D:/anaconda/ashok/Project/CV1/Flower_GUI/oxyFlower_model_after_rev.h5")
After deploying AIML Image Classifier model, monitoring its performance and how users interact with its functionality will give an idea on what new data should be collected or what additional processing should be implemented to rebuild and to redeploy a more performing model.
Image Data Streaming pipeline (camera feeds) can be arranged by using AWS cloud service. Node.js CLI script can be implemented which fork a subprocess for each camera and IPC channels can be used to track and maintain each feed. This pipeline will take less time to implement. The image data can be stored using the Amazon Simple Storage Service (S3), a high-performance storage service that provides data access through a web interface. Then, Model performance should be monitored with the new data. A small production monitoring team can be deployed to monitor & fix issues related to data streaming, model performance and application processing
Part-1: Plant seedling Image classifier models are created, trained and prediction is done using the best classifier model
Part-2: Shared the observations on how CNN outperforms other models
Part-3: Created Car dataset from scratch, highlighted the challenges in building dataset, Imported and displayed the images using python
Part-4: Flower Image classifier models are created, trained and prediction is done using the best classifier model
Part-5: Post Production - Maintenance & Support Strategy has been provided once AIML Image Classifiers are deployed in production